40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.10-xen-sparse/arch/xen/i386/kernel/signal.c
41811cac4lkCB-fHir6CcxuEJ2pGsQ linux-2.6.10-xen-sparse/arch/xen/i386/kernel/smp.c
41811ca9mbGpqBrZVrUGEiv8CTV3ng linux-2.6.10-xen-sparse/arch/xen/i386/kernel/smpboot.c
+42308df8u332Gs7XX-jX4gsfFU2zOQ linux-2.6.10-xen-sparse/arch/xen/i386/kernel/syscall_stats.c
40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/time.c
40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.10-xen-sparse/arch/xen/i386/kernel/timers/Makefile
40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.10-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
40c9c469kT0H9COWzA4XzPBjWK0WsA tools/misc/netfix
4022a73cEKvrYe_DVZW2JlAxobg9wg tools/misc/nsplitd/Makefile
4022a73cKms4Oq030x2JBzUB426lAQ tools/misc/nsplitd/nsplitd.c
+42308df9dv_ZuP49nNPIROEMQ3F_LA tools/misc/xc_shadow.c
3f5ef5a2ir1kVAthS14Dc5QIRCEFWg tools/misc/xen-clone
3f5ef5a2dTZP0nnsFoeq2jRf3mWDDg tools/misc/xen-clone.README
405eedf6_nnNhFQ1I85lhCkLK6jFGA tools/misc/xencons
space. Odds are that you want to say N here.
config XEN_WRITABLE_PAGETABLES
- bool
+ bool "writable page tables"
default y
+config XEN_SYSCALL_STATS
+ bool "system call statistics"
+ default n
+
+config XEN_DEBUG_NO_MMU_BATCHING
+ bool "Disables batching on MMU updates"
+ default n
+ help
+ This does a hypercall per PTE update
+ we only use this for benchmarking
+ enable only if you know what you are doing
+
+config XEN_BATCH_MODE1
+ bool "A variant of writable pagetable using the batch interface"
+ default n
+ help
+ default is no batching and minor mods for some batching
+ we only use this for benchmarking
+ enable only if you know what you are doing
+
+config XEN_BATCH_MODE2
+ bool "forward port of 2.4 batching"
+ default n
+ help
+ default is batching + flushes where 2.4 had them
+ we only use this for benchmarking
+ enable only if you know what you are doing
+
+
config XEN_SCRUB_PAGES
bool "Scrub memory before freeing it to Xen"
default y
c-obj-$(CONFIG_EFI) += efi.o efi_stub.o
c-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+c-obj-$(CONFIG_XEN_SYSCALL_STATS) += syscall_stats.o
+
EXTRA_AFLAGS := -traditional
c-obj-$(CONFIG_SCx200) += scx200.o
jnz syscall_trace_entry
cmpl $(nr_syscalls), %eax
jae syscall_badsys
- call *sys_call_table(,%eax,4)
+#ifdef CONFIG_XEN_SYSCALL_STATS
+ lock incl syscall_stats(,%eax,4)
+#endif
+ call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp)
cli
movl TI_flags(%ebp), %ecx
cmpl $(nr_syscalls), %eax
jae syscall_badsys
syscall_call:
- call *sys_call_table(,%eax,4)
+#ifdef CONFIG_XEN_SYSCALL_STATS
+ lock incl syscall_stats(,%eax,4)
+#endif
+ call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp) # store the return value
syscall_exit:
XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
--- /dev/null
+/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
+ ****************************************************************************
+ * (C) 2005 - Rolf Neugebauer - Intel Research Cambridge
+ ****************************************************************************
+ *
+ * File: syscall_stats.c
+ * Author: Rolf Neugebauer (rolf.neugebauer@intel.com)
+ * Date: Mar 2005
+ *
+ * Description: add a proc interface to get per system call stats
+ */
+
+
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/unistd.h>
+
+unsigned long syscall_stats[NR_syscalls];
+static unsigned char foobar[4];
+
+/* a write just resests the counter */
+static ssize_t syscall_write(struct file *f, const char *data,
+ size_t size, loff_t *pos)
+{
+ printk("resetting syscall stats\n");
+ memset(&syscall_stats, 0, sizeof(syscall_stats));
+ return size;
+}
+
+static int show_syscall(struct seq_file *m, void *v)
+{
+ int i;
+ for ( i=0; i<NR_syscalls; i++ )
+ {
+ seq_printf(m, "%lu ", syscall_stats[i]);
+ }
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos == 0 ? foobar : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+static struct seq_operations syscall_op = {
+ start: c_start,
+ next: c_next,
+ stop: c_stop,
+ show: show_syscall,
+};
+
+static int syscall_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &syscall_op);
+}
+
+static struct file_operations proc_syscall_operations = {
+ open: syscall_open,
+ read: seq_read,
+ write: syscall_write,
+ llseek: seq_lseek,
+ release: seq_release,
+};
+
+
+static struct proc_dir_entry *entry;
+
+static int __init syscall_stats_init(void)
+{
+ printk("Initialising syscall stats.\n");
+
+ entry = create_proc_entry("syscalls", 0777, NULL);
+ if (entry)
+ entry->proc_fops = &proc_syscall_operations;
+ else
+ printk("Unable to create /proc/syscalls.\n");
+ return 0;
+}
+subsys_initcall(syscall_stats_init);
per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
per_cpu(update_queue[idx], cpu).val = val;
increment_index();
+#ifdef CONFIG_XEN_DEBUG_NO_MMU_BATCHING
+ __flush_page_update_queue();
+#endif
spin_unlock_irqrestore(&update_lock, flags);
}
#endif
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
#else
+#if defined(CONFIG_XEN_DEBUG_NO_MMU_BATCHING)
+#define set_pte(pteptr, pteval)\
+ set_pte_batched(pteptr, pteval)
+#elif defined(CONFIG_XEN_BATCH_MODE)
+#define set_pte(pteptr, pteval)({\
+ set_pte_batched(pteptr, pteval);\
+ _flush_page_update_queue();})
+#else
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#endif
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
#endif
/*
* dst->page_table_lock is held on entry and exit,
* but may be dropped within pmd_alloc() and pte_alloc_map().
*/
+#ifdef CONFIG_XEN_BATCH_MODE1
+#undef set_pte
+#define set_pte(pteptr, pteval)\
+ set_pte_batched(pteptr, pteval);
+#endif
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
cond_resched_lock(&dst->page_table_lock);
cont_copy_pmd_range:
src_pmd++;
- dst_pmd++;
+ dst_pmd++;
} while ((unsigned long)src_pmd & PMD_TABLE_MASK);
+#ifdef CONFIG_XEN_BATCH_MODE1
+ _flush_page_update_queue();
+#endif
}
out_unlock:
spin_unlock(&src->page_table_lock);
free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear(ptep);
}
+#ifdef CONFIG_XEN_BATCH_MODE1
+ _flush_page_update_queue();
+#endif
pte_unmap(ptep-1);
}
+#ifdef CONFIG_XEN_BATCH_MODE1
+#undef set_pte
+#define set_pte(pteptr, pteval)\
+ set_pte_batched(pteptr, pteval);\
+ _flush_page_update_queue()
+#endif
+
static void zap_pmd_range(struct mmu_gather *tlb,
pgd_t * dir, unsigned long address,
unsigned long size, struct zap_details *details)
EXPORT_SYMBOL(get_user_pages);
+#ifdef CONFIG_XEN_BATCH_MODE1
+#undef set_pte
+#define set_pte(pteptr, pteval)\
+ set_pte_batched(pteptr, pteval);
+#endif
static void zeromap_pte_range(pte_t * pte, unsigned long address,
unsigned long size, pgprot_t prot)
{
address += PAGE_SIZE;
pte++;
} while (address && (address < end));
+
+#ifdef CONFIG_XEN_BATCH_MODE1
+ _flush_page_update_queue();
+#endif
+
}
+#ifdef CONFIG_XEN_BATCH_MODE1
+#undef set_pte
+#define set_pte(pteptr, pteval)\
+ set_pte_batched(pteptr, pteval);\
+ _flush_page_update_queue()
+#endif
static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
unsigned long size, pgprot_t prot)
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
*/
+#ifdef CONFIG_XEN_BATCH_MODE1
+#undef set_pte
+#define set_pte(pteptr, pteval)\
+ set_pte_batched(pteptr, pteval);
+#endif
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
unsigned long pfn, pgprot_t prot)
{
pfn++;
pte++;
} while (address && (address < end));
+#ifdef CONFIG_XEN_BATCH_MODE1
+ _flush_page_update_queue();
+#endif
}
+#ifdef CONFIG_XEN_BATCH_MODE1
+#undef set_pte
+#define set_pte(pteptr, pteval)\
+ set_pte_batched(pteptr, pteval);\
+ _flush_page_update_queue()
+#endif
static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long pfn, pgprot_t prot)
HDRS = $(wildcard *.h)
-TARGETS = xenperf
+TARGETS = xenperf xc_shadow
INSTALL_BIN = $(TARGETS) xencons
INSTALL_SBIN = netfix xm xend xensv xenperf
while((cpu_mask&1)) {
int i;
for (i=0x300;i<0x312;i++) {
- printf("%010llx ",cpus_rdmsr( cpu_mask, i ) );
+ printf("%010llu ",cpus_rdmsr( cpu_mask, i ) );
}
printf("\n");
cpu_mask>>=1;
--- /dev/null
+/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
+ ****************************************************************************
+ * (C) 2005 - Rolf Neugebauer - Intel Research Cambridge
+ ****************************************************************************
+ *
+ * File: xc_shadow.c
+ * Author: Rolf Neugebauer (rolf.neugebauer@intel.com)
+ * Date: Mar 2005
+ *
+ * Description:
+ */
+
+
+#include <xc.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <string.h>
+
+void usage()
+{
+ printf("xc_shadow: -[0|1|2]\n");
+ printf(" set shadow mode\n");
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ int xc_handle;
+ int mode;
+
+ if ( argc > 1 )
+ {
+ char *p = argv[1];
+ if (*p++ == '-') {
+ if (*p == '1')
+ mode = 1;
+ else if (*p == '2')
+ mode = 2;
+ else if (*p == '0')
+ mode = 0;
+ else
+ usage();
+ } else
+ usage();
+ }
+ else
+ usage();
+
+ if ( (xc_handle = xc_interface_open()) == -1 )
+ {
+ fprintf(stderr, "Error opening xc interface: %d (%s)\n",
+ errno, strerror(errno));
+ return 1;
+ }
+
+ if ( xc_shadow_control(xc_handle,
+ 0,
+ mode,
+ NULL,
+ 0,
+ NULL) < 0 )
+ {
+ fprintf(stderr, "Error reseting performance counters: %d (%s)\n",
+ errno, strerror(errno));
+ return 1;
+ }
+ return 0;
+}